Load libraries

# Load libraries, install if needed
library(tidyverse)
#> Warning: package 'tidyr' was built under R version 4.0.5
library(tidylog)
library(RCurl)
library(RColorBrewer)
#> Warning: package 'RColorBrewer' was built under R version 4.0.5
library(sdmTMB)
library(devtools)
library(patchwork)
library(sf)
#> Warning: package 'sf' was built under R version 4.0.5
library(ggeffects)
library(visreg)
library(boot)

# Source utm function
source_url("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/R/functions/lon-lat-utm.R")

# Continuous colors
# options(ggplot2.continuous.colour = "viridis")
# 
# # Discrete colors
# scale_colour_discrete <- function(...) {
#   scale_colour_brewer(palette = "Dark2")
# }
# 
# scale_fill_discrete <- function(...) {
#   scale_fill_brewer(palette = "Dark2")
# }

Make map plots

# Normally I'd source code for map plots, but this caused errors when ggplotting when knitting only, not when running the for loop in a fresh session... 
# change to url once we have the final one
#source("/Users/maxlindmark/Dropbox/Max work/R/marine-litter/R/functions/map-plot.R")
# Seem I have to run this to avoid error when knitting (the ggplotting inside the for loop)

# Specify map ranges
ymin = 54; ymax = 60; xmin = 2; xmax = 21

map_data <- rnaturalearth::ne_countries(
  scale = "medium",
  returnclass = "sf",
  continent = "europe")

# Crop the polygon for plotting and efficiency:
# st_bbox(map_data) # find the rough coordinates
sf::sf_use_s2(FALSE)
#> Spherical geometry (s2) switched off

swe_coast <- suppressWarnings(suppressMessages(
  st_crop(map_data,
          c(xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax))))

ggplot(swe_coast) + 
  geom_sf()


# Transform our map into UTM 33 coordinates, which is the equal-area projection we fit in:
utm_zone33 <- 32633
swe_coast_proj <- sf::st_transform(swe_coast, crs = utm_zone33)

ggplot(swe_coast_proj) + 
  geom_sf()


# Define plotting theme for main plot
theme_plot <- function(base_size = 11, base_family = "") {
  theme_light(base_size = 11, base_family = "") +
    theme(
      axis.text = element_text(color = "grey5"),
      legend.position = "bottom",
      legend.key.height = unit(0.2, "cm"),
      legend.margin = margin(0, 0, 0, 0),
      legend.box.margin = margin(-5, -5, -5, -5),
      strip.background = element_rect(fill = "grey95"),
      strip.text = element_text(color = "grey10"),
      strip.text.x = element_text(margin = margin(b = 2, t = 2), color = "grey10", size = 10),
      panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
      panel.background = element_blank()
      #, axis.line = element_line(colour = "grey20"),
    )
}

# Make default base map plot
#sf::st_boundary(swe_coast_proj)
xmin2 <- -61896.44*1.005
xmax2 <- 893074.5*0.91
ymin2 <- 5983578*1.025
ymax2 <- 6691902*0.99

plot_map <-
  ggplot(swe_coast_proj) +
  xlim(xmin2, xmax2) +
  ylim(ymin2, ymax2) +
  labs(x = "Longitude", y = "Latitude") +
  geom_sf(size = 0.3) +
  theme(axis.text.x = element_text(angle = 90)) +
  theme_plot() +
  NULL

plot_map_west <-
  ggplot(swe_coast_proj) +
  xlim(200000, xmax2*0.45) +
  ylim(ymin2*1.015, ymax2*0.99) +
  labs(x = "Longitude", y = "Latitude") +
  geom_sf(size = 0.3) +
  theme_plot() +
  theme(axis.text.x = element_text(angle = 90)) +
  NULL

Read data

# Read and make data long so that I can for loop through all categories
biom <- read_csv("data/west_coast_litter_biomass.csv") %>% 
  mutate(year_f = as.factor(year),
         quarter_f = as.factor(quarter),
         depth_sc = (depth - mean(depth)) / sd(depth)) %>% 
  rename(plastic = plast) %>% 
  pivot_longer(c(plastic, sup, fishery),
               names_to = "litter_category",
               values_to = "density") %>% 
  filter(litter_category %in% c("fishery", "plastic", "sup"))
#> Rows: 609 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr  (4): haul_id, ansse_area, trend_area, var
#> dbl (18): diverse, naturmaterial, plast, metall, glas, gummi, quarter, st_no...
#> lgl  (1): balse_area
#> 
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> 
#>         new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#> 
#>         new variable 'depth_sc' (double) with 125 unique values and 0% NA
#> 
#> rename: renamed one variable (plastic)
#> 
#> pivot_longer: reorganized (plastic, sup, fishery) into (litter_category, density) [was 609x26, now 1827x25]
#> 
#> filter: no rows removed

num <- read_csv("data/west_coast_litter_numbers.csv") %>% 
  mutate(year_f = as.factor(year),
         quarter_f = as.factor(quarter),
         depth_sc = (depth - mean(depth)) /sd(depth)) %>% 
  rename(plastic = plast) %>% 
  pivot_longer(c(plastic, sup, fishery),
               names_to = "litter_category",
               values_to = "density") %>% 
  mutate(number = density * swept_area_km2) %>% 
  filter(litter_category %in% c("fishery", "plastic", "sup"))
#> Rows: 609 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr  (4): haul_id, ansse_area, trend_area, var
#> dbl (18): diverse, naturmaterial, plast, metall, glas, gummi, quarter, st_no...
#> lgl  (1): balse_area
#> 
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> 
#>         new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#> 
#>         new variable 'depth_sc' (double) with 125 unique values and 0% NA
#> 
#> rename: renamed one variable (plastic)
#> 
#> pivot_longer: reorganized (plastic, sup, fishery) into (litter_category, density) [was 609x26, now 1827x25]
#> 
#> mutate: new variable 'number' (double) with 125 unique values and 0% NA
#> 
#> filter: no rows removed

binom <- read_csv("data/west_coast_litter_biomass.csv") %>% 
  mutate(year_f = as.factor(year),
         quarter_f = as.factor(quarter),
         depth_sc = (depth - mean(depth)) / sd(depth)) %>% 
  rename(other = diverse,
         natural = naturmaterial,
         glass = glas,
         metal = metall,
         rubber = gummi) %>% 
  pivot_longer(c(other, natural, glass, metal, rubber),
               names_to = "litter_category",
               values_to = "density") %>% 
  filter(!litter_category %in% c("fishery", "plastic", "sup")) %>% 
  mutate(present = ifelse(density == 0, 0, 1))
#> Rows: 609 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr  (4): haul_id, ansse_area, trend_area, var
#> dbl (18): diverse, naturmaterial, plast, metall, glas, gummi, quarter, st_no...
#> lgl  (1): balse_area
#> 
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> 
#>         new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#> 
#>         new variable 'depth_sc' (double) with 125 unique values and 0% NA
#> 
#> rename: renamed 5 variables (other, natural, metal, glass, rubber)
#> 
#> pivot_longer: reorganized (other, natural, metal, glass, rubber) into (litter_category, density) [was 609x26, now 3045x23]
#> 
#> filter: no rows removed
#> 
#> mutate: new variable 'present' (double) with 2 unique values and 0% NA

# Load pred grid
pred_grid_west <- read_csv("data/pred_grid_west.csv") %>% 
  mutate(year_f = as.factor(year),
         quarter_f = as.factor(1),
         depth_sc = (depth - mean(num$depth)) / sd(num$depth)) %>% 
  mutate(X = X*1000,
         Y = Y*1000)
#> Rows: 42580 Columns: 4
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> dbl (4): X, Y, year, depth
#> 
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> 
#>         new variable 'quarter_f' (factor) with one unique value and 0% NA
#> 
#>         new variable 'depth_sc' (double) with 3,207 unique values and 0% NA
#> 
#> mutate: changed 42,580 values (100%) of 'X' (0 new NA)
#> 
#>         changed 42,580 values (100%) of 'Y' (0 new NA)

Fit models

For plastic, sup and fishery I have enough data to fit spatial models with both biomass density (Tweedie) and numbers (Poisson). For the other categories, I will only fit presence/absence models, because that’s almost what we got anyway.

# https://haakonbakkagit.github.io/btopic104.html
# https://haakonbakkagit.github.io/btopic114.html

max.edge <- mean(c(diff(range(num$X)), diff(range(num$Y)))) / 15
cutoff <- max.edge/5

mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), cutoff = 3)
#> filter: removed 1,218 rows (67%), 609 rows remaining
plot(mesh)

Test model to see


    # dd <- biom %>%
    #     filter(litter_category == "fishery") %>%
    #     droplevels()
    # 
    # m0 <- sdmTMB(
    #   data = dd,
    #   formula = density ~ 0 + year_f + depth_sc,
    #   mesh = mesh,
    #   family = tweedie(),
    #   spatial = "off",
    #   time = "year",
    #   spatiotemporal = "off"
    #   )
    # 
    # dd$m0_resids <- residuals(m0) # randomized quantile residuals
    # 
    # m1 <- sdmTMB(
    #   data = dd,
    #   formula = density ~ 0 + year_f + depth_sc,
    #   mesh = mesh,
    #   family = tweedie(),
    #   spatial = "on",
    #   time = "year",
    #   spatiotemporal = "IID"
    #   )
    # 
    # dd$m1_resids <- residuals(m1) # randomized quantile residuals
    # 
    # dd2 <- dd %>%
    #   pivot_longer(c(m0_resids, m1_resids))
    #    
    # ggplot(dd2 %>% filter(value > -3 & value < 3), aes(X, Y, col = value)) +
    #   scale_colour_gradient2() +
    #   geom_point(size = 2) +
    #   facet_grid(name~year) +
    #   theme_minimal() +
    #   coord_fixed() + 
    #   theme(panel.grid.major = element_blank(),
    #         panel.grid.minor = element_blank(),
    #         panel.background = element_blank())
    #  
    # 
    # pred_fixed <- predict(m0)$est
    # s <- simulate(m0, nsim = 500)
    # r <- DHARMa::createDHARMa(
    #   simulatedResponse = s,
    #   observedResponse = dd$density,
    #   fittedPredictedResponse = pred_fixed
    #   )
    # plot(r)
    # DHARMa::testSpatialAutocorrelation(r, x = dd$X, y = dd$Y)
    # 
    # # In this case, it doesn't seem like a very clear spatial correlation
    # 
    # print(m1)
        

Binomial models

data_list_coef <- list()
data_list_pred <- list()

# Year as a random effect because in some years we don't have any observations at all of presences

for(i in unique(binom$litter_category)) {
    
    dd <- binom %>%
        filter(litter_category == i) %>%
        droplevels()
    
    m <- sdmTMB(
      data = dd,
      formula = present ~ depth_sc + (1|year_f),
      offset = dd$swept_area_km2,
      mesh = mesh,
      family = binomial(link = "logit"),
      spatial = "off",
      time = "year",
      spatiotemporal = "off",
      control = sdmTMBcontrol(newton_loops = 1),
      )
    
    sanity(m)
    tidy(m, conf.int = TRUE)
    
    data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("binom", i, sep = "_"))

    # Plot residuals
    mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
    qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
    qqline(mcmc_res)
    
    # Check for spatial autocorrelation
    pred_fixed <- m$family$linkinv(predict(m)$est)
    s <- simulate(m, nsim = 500)
    r <- DHARMa::createDHARMa(
      simulatedResponse = s,
      observedResponse = dd$present,
      fittedPredictedResponse = pred_fixed
      )
    plot(r)
    DHARMa::testSpatialAutocorrelation(r, x = dd$X, y = dd$Y)
    
    # Plot conditional/marginal effects
    visreg(m, xvar = "year_f", scale = "response")
    
    nd <- data.frame(
      depth_sc = mean(dd$depth_sc),
      year = unique(as.integer(as.character(dd$year_f)))) %>%
      mutate(year_f = as.factor(year),
             X = 0,
             Y = 0)

    p <- predict(m, newdata = nd, se_fit = TRUE, re_form = NULL)
    
    print(ggplot(p, aes(year, inv.logit(est),
                  ymin = inv.logit(est - 1.96 * est_se),
                  ymax = inv.logit(est + 1.96 * est_se)
                  )) +
      geom_line() +
      geom_ribbon(alpha = 0.4) +
      coord_cartesian(expand = F))
    
    data_list_pred[[i]] <- p %>% mutate(model = i)
    
    # Save model object
    saveRDS(m, paste("output/models/binom_", i, ".rds", sep = ""))
    
    print(ggplot(p, aes(year, y = inv.logit(est), ymax = inv.logit(est + 1.96*est_se), ymin = inv.logit(est - 1.96*est_se))) +
      geom_line() +
      geom_line(data = dd %>%
                  group_by(year) %>%
                  summarise(present = mean(present)),
                aes(year, present, color = "Data (mean)"), linetype = 2,
                inherit.aes = FALSE) + # Add data
      geom_ribbon(alpha = 0.2) +
      scale_color_brewer(palette = "Set1", name = "") +
      scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) + 
      theme_plot() +
      xlab('Year') +
      ylab('Mean estimate') +
      NULL)
    
    ggsave(paste("figures/supp/mean_pred_comp_binom_", i, ".png", sep = ""), dpi = 300)
    
}
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000361 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.61 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.58886 seconds (Warm-up)
#> Chain 1:                0.002745 seconds (Sampling)
#> Chain 1:                0.591605 seconds (Total)
#> Chain 1:

#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>         new variable 'X' (double) with one unique value and 0% NA
#>         new variable 'Y' (double) with one unique value and 0% NA

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000374 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.74 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.616906 seconds (Warm-up)
#> Chain 1:                0.002811 seconds (Sampling)
#> Chain 1:                0.619717 seconds (Total)
#> Chain 1:

#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>         new variable 'X' (double) with one unique value and 0% NA
#>         new variable 'Y' (double) with one unique value and 0% NA

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_G` standard error may be large
#> ℹ `ln_tau_G` is an internal parameter affecting `sigma_G`
#> ℹ `sigma_G` is the random intercept standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `sigma_G` is smaller than 0.01
#> ℹ Consider omitting this part of the model
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000396 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.96 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.567071 seconds (Warm-up)
#> Chain 1:                0.003015 seconds (Sampling)
#> Chain 1:                0.570086 seconds (Total)
#> Chain 1:

#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>         new variable 'X' (double) with one unique value and 0% NA
#>         new variable 'Y' (double) with one unique value and 0% NA

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.00039 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.9 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.68506 seconds (Warm-up)
#> Chain 1:                0.003281 seconds (Sampling)
#> Chain 1:                0.688341 seconds (Total)
#> Chain 1:

#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>         new variable 'X' (double) with one unique value and 0% NA
#>         new variable 'Y' (double) with one unique value and 0% NA

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_G` standard error may be large
#> ℹ `ln_tau_G` is an internal parameter affecting `sigma_G`
#> ℹ `sigma_G` is the random intercept standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `sigma_G` is smaller than 0.01
#> ℹ Consider omitting this part of the model
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000403 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 4.03 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.61369 seconds (Warm-up)
#> Chain 1:                0.003314 seconds (Sampling)
#> Chain 1:                0.617004 seconds (Total)
#> Chain 1:

#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>         new variable 'X' (double) with one unique value and 0% NA
#>         new variable 'Y' (double) with one unique value and 0% NA

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image


# Save predictions and sims as data frames
dat_coef_binom <- dplyr::bind_rows(data_list_coef)
dat_pred_binom <- dplyr::bind_rows(data_list_pred)

write_csv(dat_coef_binom, "output/dat_coef_binom.csv")
write_csv(dat_pred_binom, "output/dat_pred_binom.csv")

Poisson/negative binomial models

data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()

for(i in unique(num$litter_category)) {
    
    dd <- num %>%
        filter(litter_category == i) %>%
        droplevels()
    
    m <- sdmTMB(
      data = dd,
      formula = number ~ -1 + year_f + depth_sc,
      mesh = mesh,
      offset = dd$swept_area_km2,
      family = nbinom2(link = "log"),
      spatial = "on",
      time = "year",
      spatiotemporal = "off",
      control = sdmTMBcontrol(newton_loops = 1)
      )
    
    sanity(m)
    tidy(m, conf.int = TRUE)
    data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("poisson", i, sep = "_"))

    # Plot residuals
    mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
    qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
    qqline(mcmc_res)
    
    # Plot conditional/marginal effects
    visreg(m, xvar = "year_f", scale = "response")

    # Save model object
    saveRDS(m, paste("output/models/numbers_", i, ".rds", sep = ""))
    
    # Predict on grid
    pred <- predict(m, newdata = pred_grid_west) %>% 
      mutate(model = i)
    
    data_list_pred[[i]] <- pred
  
    # Get sims
    ncells <- filter(pred_grid_west, year == max(pred_grid_west$year)) %>% nrow()
    nsim <- 500
    sim <- predict(m, newdata = pred_grid_west, nsim = nsim)
    
    # Plot CV in space
    # Just plot last year
    sim_last <- sim[pred_grid_west$year == max(pred_grid_west$year), ]
    
    pred_last <- pred[pred$year == max(pred_grid_west$year), ]
    pred_last$cv <- round(apply(exp(sim_last), 1, function(x) sd(x) / mean(x)), 2)
    
    print(plot_map_west + 
      geom_raster(data = pred_last, aes(X, Y, fill = cv)) +
      scale_fill_viridis_c() + 
      geom_sf(size = 0.1) + 
      NULL)
    
    ggsave(paste("figures/supp/cv_numbers_", i, ".png", sep = ""))
    
    # Get index & full index (i.e. returning all sims)
    index_sim <- get_index_sims(sim,
                                area = rep(1/ncells, nrow(sim))) %>% mutate(model = i) # Note that we just get the means here
    
    data_list_sim[[i]] <- index_sim
    
    index_sim_full <- get_index_sims(sim,
                                     area = rep(1/ncells, nrow(sim)),
                                     return_sims = TRUE) %>% mutate(model = i) # Note that we just get the means here
    
    data_list_sims[[i]] <- index_sim_full
    
    
    # See how mean index compares to data
    
    print(ggplot(index_sim, aes(year, y = est, ymin = lwr, ymax = upr)) +
      geom_line() +
      geom_line(data = dd %>%
                  group_by(year) %>%
                  summarise(mean_number = mean(number)),
                aes(year, mean_number, color = "Data (mean)"), linetype = 2,
                inherit.aes = FALSE) + # Add data
      geom_ribbon(alpha = 0.2) +
      scale_color_brewer(palette = "Set1", name = "") +
      scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) + 
      theme_plot() +
      xlab('Year') +
      ylab('Mean estimate') +
      NULL)
    
    ggsave(paste("figures/supp/mean_pred_comp_num_", i, ".png", sep = ""), dpi = 300)
    
}
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.001637 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 16.37 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 2.37481 seconds (Warm-up)
#> Chain 1:                0.011118 seconds (Sampling)
#> Chain 1:                2.38593 seconds (Total)
#> Chain 1:

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining

#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> group_by: one grouping variable (year)
#> 
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000591 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 5.91 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 1.77236 seconds (Warm-up)
#> Chain 1:                0.008238 seconds (Sampling)
#> Chain 1:                1.7806 seconds (Total)
#> Chain 1:

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining

#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000758 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.58 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 2.2107 seconds (Warm-up)
#> Chain 1:                0.009133 seconds (Sampling)
#> Chain 1:                2.21983 seconds (Total)
#> Chain 1:

#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining

#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image


# Save predictions and sims as data frames
dat_coef_num <- dplyr::bind_rows(data_list_coef)
dat_pred_num <- dplyr::bind_rows(data_list_pred)
dat_sim_num <- dplyr::bind_rows(data_list_sim)
dat_sims_num <- dplyr::bind_rows(data_list_sims)

write_csv(dat_coef_num, "output/dat_coef_num.csv")
write_csv(dat_pred_num, "output/dat_pred_num.csv")
write_csv(dat_sim_num, "output/dat_sim_num.csv")
write_csv(dat_sims_num, "output/dat_sims_num.csv")

Fit tweedie biomass density models

data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()

for(i in unique(biom$litter_category)) {
    
    dd <- biom %>%
        filter(litter_category == i) %>%
        droplevels()
    
    m <- sdmTMB(
      data = dd,
      formula = density ~ 0 + year_f + depth_sc,
      mesh = mesh,
      family = tweedie(),
      spatial = "on",
      time = "year",
      spatiotemporal = "off"
      )
    
    sanity(m)
    tidy(m, conf.int = TRUE)
    data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("tweedie", i, sep = "_"))

    
    # Plot residuals
    mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
    qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
    qqline(mcmc_res)
    
    # Save model object
    saveRDS(m, paste("output/models/biomass_", i, ".rds", sep = ""))
    
    # Predict on grid
    pred <- predict(m, newdata = pred_grid_west) %>% 
      mutate(model = i)
    
    data_list_pred[[i]] <- pred
  
    # Get sims
    nsim <- 500
    sim <- predict(m, newdata = pred_grid_west, nsim = nsim)
    
    # Plot CV in space
    # Just plot last year
    sim_last <- sim[pred_grid_west$year == max(pred_grid_west$year), ]
    
    pred_last <- pred[pred$year == max(pred_grid_west$year), ]
    pred_last$cv <- round(apply(exp(sim_last), 1, function(x) sd(x) / mean(x)), 2)
    
    print(plot_map_west + 
      geom_raster(data = pred_last, aes(X, Y, fill = cv)) +
      scale_fill_viridis_c() + 
      geom_sf(size = 0.1) + 
      NULL)
    
    ggsave(paste("figures/supp/cv_biomass_", i, ".png", sep = ""))
    
    # Get index & full index (i.e. returning all sims)
    index_sim <- get_index_sims(sim,
                                area = rep(2*2, nrow(sim))) %>% mutate(model = i)
    
    data_list_sim[[i]] <- index_sim
    
    index_sim_full <- get_index_sims(sim,
                                     area = rep(2*2, nrow(sim)),
                                     return_sims = TRUE) %>% mutate(model = i)
    
    data_list_sims[[i]] <- index_sim_full
    
    
    # See how mean index compares to data
    ncells <- filter(pred_grid_west, year == max(pred_grid_west$year)) %>% nrow()
    
    index_sim_avg <- get_index_sims(sim, area = rep(1/ncells, nrow(sim)))
    
    print(ggplot(index_sim_avg, aes(year, y = est, ymin = lwr, ymax = upr)) +
      geom_line() +
      geom_line(data = dd %>%
                  group_by(year) %>%
                  summarise(mean_density = mean(density)),
                aes(year, mean_density, color = "Data (mean)"), linetype = 2,
                inherit.aes = FALSE) + # Add data
      #geom_ribbon(alpha = 0.2) +
      scale_color_brewer(palette = "Set1", name = "") +
      scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) + 
      theme_plot() +
      xlab('Year') +
      ylab('Mean biomass estimate (kg)') +
      NULL)
    
    ggsave(paste("figures/supp/mean_pred_comp_biomass_", i, ".png", sep = ""), dpi = 300)
    
}
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000175 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.75 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.559347 seconds (Warm-up)
#> Chain 1:                0.002477 seconds (Sampling)
#> Chain 1:                0.561824 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> 
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> 
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> group_by: one grouping variable (year)
#> 
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000165 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.65 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.585495 seconds (Warm-up)
#> Chain 1:                0.00258 seconds (Sampling)
#> Chain 1:                0.588075 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> 
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0.000173 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.73 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: Iteration:   1 / 201 [  0%]  (Warmup)
#> Chain 1: Iteration:  20 / 201 [  9%]  (Warmup)
#> Chain 1: Iteration:  40 / 201 [ 19%]  (Warmup)
#> Chain 1: Iteration:  60 / 201 [ 29%]  (Warmup)
#> Chain 1: Iteration:  80 / 201 [ 39%]  (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%]  (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%]  (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%]  (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%]  (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%]  (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%]  (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%]  (Sampling)
#> Chain 1: 
#> Chain 1:  Elapsed Time: 0.670873 seconds (Warm-up)
#> Chain 1:                0.002621 seconds (Sampling)
#> Chain 1:                0.673494 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA

#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped

#> Saving 12 x 7.42 in image


# Save predictions and sims as data frames
dat_coef_biom <- dplyr::bind_rows(data_list_coef)
dat_pred_biom <- dplyr::bind_rows(data_list_pred)
dat_sim_biom <- dplyr::bind_rows(data_list_sim)
dat_sims_biom <- dplyr::bind_rows(data_list_sims)

write_csv(dat_coef_biom, "output/dat_coef_biomass.csv")
write_csv(dat_pred_biom, "output/dat_pred_biomass.csv")
write_csv(dat_sim_biom, "output/dat_sim_biomass.csv")
write_csv(dat_sims_biom, "output/dat_sims_biomass.csv")